By Hasan Korre
The goals / steps of this project are the following:
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
from moviepy.editor import VideoFileClip
%matplotlib inline
def grab_video_frame(filepath_, frame_):
clip = VideoFileClip(filepath_)
return clip.get_frame(frame_)
# image should be RGB
def save_image(img_, filepath_):
mpimg.imsave(filepath_, img_)
## try it ######
frame_num = 10
test_frame = grab_video_frame('challenge_video.mp4', frame_num)
plt.imshow(test_frame)
'''
SAVE_FOLDER = 'challenge_frames/'
save_name = SAVE_FOLDER + 'challenge_{}.jpg'.format(frame_num)
save_image(test_frame, save_name)
'''
print('Success: Defined functions to grab video frames.')
import numpy as np
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
%matplotlib inline
# Read in a calibration image
img = mpimg.imread('camera_cal/calibration2.jpg')
plt.imshow(img)
# Arrays to store object points and image points from all the images
obj_points = [] #3D points in real world space
img_points = [] #2D point in image plane
# Prepare object points, like (0,0,0), (1,0,0), (2,0,0), (8,5,0)
nx = 9 #num of inside corners in x
ny = 6 #num of inside corners in y
objp = np.zeros((ny*nx,3), np.float32)
objp[:,:2] = np.mgrid[0:9,0:6].T.reshape(-1,2)
# Convert image to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Find the chessboard corners
ret, corners = cv2.findChessboardCorners(gray,(nx, ny), None)
# If corners are found, add object points and image points
if ret == True:
img_points.append(corners)
obj_points.append(objp)
# Draw and display the corners
cv2.drawChessboardCorners(img, (nx, ny), corners, ret)
plt.imshow(img)
import glob
# Read in and make a list of calibration image
images = glob.glob('camera_cal/calibration*.jpg')
# Arrays to store object points and image points from all the images
obj_points = [] #3D points in real world space
img_points = [] #2D point in image plane
# Prepare object points, like (0,0,0), (1,0,0), (2,0,0), (8,5,0)
nx = 9 #num of inside corners in x
ny = 6 #num of inside corners in y
objp = np.zeros((ny*nx,3), np.float32)
objp[:,:2] = np.mgrid[0:9,0:6].T.reshape(-1,2)
for filename in images:
# read in each image
img = mpimg.imread(filename)
# Convert image to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Find the chessboard corners
ret, corners = cv2.findChessboardCorners(gray,(nx, ny), None)
# If corners are found, add object points and image points
if ret == True:
img_points.append(corners)
obj_points.append(objp)
# Calibrate the camera
img_size = (img.shape[1], img.shape[0])
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(obj_points, img_points, img_size, None, None)
print('Success: Calibrated camera.')
# Undistortion function
# mtx_ = camera matrix
# dist_ = distortion coefficients
def cal_undistort(img_, mtx_, dist_):
return cv2.undistort(img_, mtx_, dist_, None, mtx_)
# Test
img_original = mpimg.imread('camera_cal/calibration5.jpg')
img_undistort = cal_undistort(img_original, mtx, dist)
# Display
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9))
f.tight_layout()
ax1.imshow(img_original)
ax1.set_title('Original Image', fontsize=50)
ax2.imshow(img_undistort)
ax2.set_title('Undistorted Image', fontsize=50)
plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
%matplotlib inline
print('Success: Imports done.')
test_img = mpimg.imread('test_images/test3.jpg')
# Display the image
plt.imshow(test_img)
'''
run it...
'''
test_img_undistort = cal_undistort(test_img, mtx, dist)
# Display the image
plt.imshow(test_img_undistort)
# Concept based on work by Vivek Yadav
# "Robust lane finding using advanced computer vision techniques: Mid project update" (Medium)
def apply_color_mask(image_hsv_, hsv_low_, hsv_high_):
img_masked = np.copy(image_hsv_)
thresholds = ((image_hsv_[:,:,0]<hsv_low_[0]) | (image_hsv_[:,:,0]>hsv_high_[0])) \
| ((image_hsv_[:,:,1]<hsv_low_[1]) | (image_hsv_[:,:,0]>hsv_high_[1])) \
| ((image_hsv_[:,:,2]<hsv_low_[2]) | (image_hsv_[:,:,0]>hsv_high_[2]))
img_masked[thresholds] = [0,0,0]
return img_masked
def apply_yellow_mask(img_):
hsv = cv2.cvtColor(img_, cv2.COLOR_RGB2HSV)
yellow_hsv_low = np.array([ 0, 80, 200])
yellow_hsv_high = np.array([40, 255, 255])
masked = apply_color_mask(hsv, yellow_hsv_low, yellow_hsv_high)
return cv2.cvtColor(masked, cv2.COLOR_HSV2RGB)
def apply_white_mask(img_):
hsv = cv2.cvtColor(img_, cv2.COLOR_RGB2HSV)
white_hsv_low = np.array([ 20, 0, 200])
white_hsv_high = np.array([255, 80, 255])
masked = apply_color_mask(hsv, white_hsv_low, white_hsv_high)
return cv2.cvtColor(masked, cv2.COLOR_HSV2RGB)
def combine_color_masks(img_):
yellow_mask = apply_yellow_mask(img_)
white_mask = apply_white_mask(img_)
yellow_mask = cv2.cvtColor(yellow_mask, cv2.COLOR_RGB2GRAY)
white_mask = cv2.cvtColor(white_mask, cv2.COLOR_RGB2GRAY)
combine_color = np.zeros_like(yellow_mask)
combine_color[(yellow_mask > 0) | (white_mask > 0)] = 1
return combine_color
'''
run it...
'''
test_yellow_mask = apply_yellow_mask(test_img_undistort)
test_white_mask = apply_white_mask(test_img_undistort)
test_color_masks = combine_color_masks(test_img_undistort)
# Display
f1, (a11, a12) = plt.subplots(1, 2, figsize=(8, 6))
f1.tight_layout()
a11.imshow(test_img_undistort)
a11.set_title('Original Image', fontsize=15)
a12.imshow(test_color_masks, cmap='gray')
a12.set_title('Combined Masks', fontsize=15)
plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)
f2, (a21, a22) = plt.subplots(1, 2, figsize=(8, 6))
f2.tight_layout()
a21.imshow(test_yellow_mask)
a21.set_title('Yellow Mask', fontsize=15)
a22.imshow(test_white_mask)
a22.set_title('White Mask', fontsize=15)
plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)
# Convert to HLS color space and separate the L & S channels
def rgb_to_l(img_):
hls = cv2.cvtColor(img_, cv2.COLOR_RGB2HLS)
l_channel = hls[:,:,1]
return l_channel
def rgb_to_s(img_):
hls = cv2.cvtColor(img_, cv2.COLOR_RGB2HLS)
s_channel = hls[:,:,2]
return s_channel
'''
run it...
'''
test_l_channel = rgb_to_l(test_img_undistort)
test_s_channel = rgb_to_s(test_img_undistort)
# Display
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9))
f.tight_layout()
ax1.imshow(test_l_channel, cmap='gray')
ax1.set_title('L-Channel', fontsize=50)
ax2.imshow(test_s_channel, cmap='gray')
ax2.set_title('S-Channel', fontsize=50)
plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)
# Define a function that takes an image, gradient orientation,
# and threshold min / max values.
def abs_sobel_thresh(img_, orient_='x', thresh_min_=0, thresh_max_=255):
if orient_ == 'x':
abs_sobel = np.absolute(cv2.Sobel(img_, cv2.CV_64F, 1, 0))
if orient_ == 'y':
abs_sobel = np.absolute(cv2.Sobel(img_, cv2.CV_64F, 0, 1))
# Rescale back to 8-bit
scaled_sobel = np.uint8(255*abs_sobel/np.max(abs_sobel))
binary_output = np.zeros_like(scaled_sobel)
binary_output[(scaled_sobel >= thresh_min_) & (scaled_sobel <= thresh_max_)] = 1
return binary_output
# Define a function to return the magnitude of the gradient
# for a given sobel kernel size and threshold values
def mag_thresh(img_, sobel_kernel_=3, mag_thresh_=(0, 255)):
# Take both Sobel x and y gradients
sobelx = cv2.Sobel(img_, cv2.CV_64F, 1, 0, ksize=sobel_kernel_)
sobely = cv2.Sobel(img_, cv2.CV_64F, 0, 1, ksize=sobel_kernel_)
# Calculate the gradient magnitude
grad_mag = np.sqrt(sobelx**2 + sobely**2)
# Rescale to 8 bit
grad_mag = np.uint8(255*grad_mag/np.max(grad_mag))
# Create binary image
binary_output = np.zeros_like(grad_mag)
binary_output[(grad_mag >= mag_thresh_[0]) & (grad_mag <= mag_thresh_[1])] = 1
return binary_output
# Define a function to threshold an image for a given range and Sobel kernel
def dir_threshold(img_, sobel_kernel_=3, thresh_=(0, np.pi/2)):
# Calculate the x and y gradients
sobelx = cv2.Sobel(img_, cv2.CV_64F, 1, 0, ksize=sobel_kernel_)
sobely = cv2.Sobel(img_, cv2.CV_64F, 0, 1, ksize=sobel_kernel_)
abs_gradDir = np.arctan2(np.absolute(sobely), np.absolute(sobelx))
# Create binary image
binary_output = np.zeros_like(abs_gradDir)
binary_output[(abs_gradDir >= thresh_[0]) & (abs_gradDir <= thresh_[1])] = 1
return binary_output
def sobel_complex_combine(image_):
# Apply each of the thresholding functions
gradx = abs_sobel_thresh(image_, orient_='x', thresh_min_=20, thresh_max_=100)
grady = abs_sobel_thresh(image_, orient_='y', thresh_min_=20, thresh_max_=100)
mag_binary = mag_thresh(image_, sobel_kernel_=3, mag_thresh_=(30, 100))
dir_binary = dir_threshold(image_, sobel_kernel_=15, thresh_=(0.7, 1.3))
combined = np.zeros_like(dir_binary)
combined[((gradx == 1) & (grady == 1)) | ((mag_binary == 1) & (dir_binary == 1))] = 1
return combined.astype(np.uint8)
'''
run it...
'''
# L-Channel
test_abs_sobelx = abs_sobel_thresh(test_l_channel, orient_='x', thresh_min_=20, thresh_max_=100)
test_abs_sobely = abs_sobel_thresh(test_l_channel, orient_='y', thresh_min_=20, thresh_max_=100)
test_mag_sobel = mag_thresh(test_l_channel, sobel_kernel_=3, mag_thresh_=(30, 100))
test_dir_sobel = dir_threshold(test_l_channel, sobel_kernel_=15, thresh_=(0.7, 1.3))
test_combine_sobel_L = sobel_complex_combine(test_l_channel)
# Display
f1, (a11, a12) = plt.subplots(1, 2, figsize=(8, 6))
f1.tight_layout()
a11.imshow(test_img)
a11.set_title('Original Image', fontsize=15)
a12.imshow(test_combine_sobel_L, cmap='gray')
a12.set_title('Complex Combined', fontsize=15)
f2, (a21, a22) = plt.subplots(1, 2, figsize=(8, 6))
f2.tight_layout()
a21.imshow(test_abs_sobelx, cmap='gray')
a21.set_title('Abs SobelX', fontsize=15)
a22.imshow(test_abs_sobely, cmap='gray')
a22.set_title('Abs SobelY', fontsize=15)
f3, (a31, a32) = plt.subplots(1, 2, figsize=(8, 6))
f3.tight_layout()
a31.imshow(test_mag_sobel, cmap='gray')
a31.set_title('Sobel Mag', fontsize=15)
a32.imshow(test_dir_sobel, cmap='gray')
a32.set_title('Sobel Dir', fontsize=15)
'''
run it...
'''
# S-Channel
test_abs_sobelx = abs_sobel_thresh(test_s_channel, orient_='x', thresh_min_=20, thresh_max_=100)
test_abs_sobely = abs_sobel_thresh(test_s_channel, orient_='y', thresh_min_=20, thresh_max_=100)
test_mag_sobel = mag_thresh(test_s_channel, sobel_kernel_=3, mag_thresh_=(30, 100))
test_dir_sobel = dir_threshold(test_s_channel, sobel_kernel_=15, thresh_=(0.7, 1.3))
test_combine_sobel_S = sobel_complex_combine(test_s_channel)
# Display
f1, (a11, a12) = plt.subplots(1, 2, figsize=(8, 6))
f1.tight_layout()
a11.imshow(test_img)
a11.set_title('Original Image', fontsize=15)
a12.imshow(test_combine_sobel_S, cmap='gray')
a12.set_title('Complex Combined', fontsize=15)
f2, (a21, a22) = plt.subplots(1, 2, figsize=(8, 6))
f2.tight_layout()
a21.imshow(test_abs_sobelx, cmap='gray')
a21.set_title('Abs SobelX', fontsize=15)
a22.imshow(test_abs_sobely, cmap='gray')
a22.set_title('Abs SobelY', fontsize=15)
f3, (a31, a32) = plt.subplots(1, 2, figsize=(8, 6))
f3.tight_layout()
a31.imshow(test_mag_sobel, cmap='gray')
a31.set_title('Sobel Mag', fontsize=15)
a32.imshow(test_dir_sobel, cmap='gray')
a32.set_title('Sobel Dir', fontsize=15)
def combine_threshold(color_thresh_, combine_sobel_l_, combine_sobel_s_):
# Stack each channel to view their individual contributions in green and blue respectively
color_binary = np.dstack(( combine_sobel_l_,
combine_sobel_s_,
color_thresh_ ))
color_binary[color_binary > 0.5] = 255 #help plotting of rgb
# Combine the two binary thresholds
combined_binary = np.zeros_like(color_thresh_)
combined_binary[(color_thresh_ == 1) | (combine_sobel_l_ == 1) | (combine_sobel_s_ == 1)] = 1
return color_binary, combined_binary
'''
run it...
'''
# Concept of separate L and S sobel based on work by Vivek Yadav
# "Robust lane finding using advanced computer vision techniques: Mid project update" (Medium)
color_binary, combined_binary = combine_threshold(test_color_masks, test_combine_sobel_L, test_combine_sobel_S)
# Plotting thresholded images
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(20,10))
ax1.set_title('Stacked thresholds (green=sobelx, blue=s_channel)')
ax1.imshow(color_binary)
ax2.set_title('Combined S-channel and Sobel thresholds')
ax2.imshow(combined_binary, cmap='gray')
warp_src = np.float32([[0,0],[0,0],[0,0],[0,0]])
warp_dst = np.float32([[0,0],[0,0],[0,0],[0,0]])
X_INDEX = 0
Y_INDEX = 1
def update_warp_points(img_):
global warp_src
global warp_dst
height = img_.shape[0]
width = img_.shape[1]
#top_left
warp_src[0,X_INDEX] = width*0.43
warp_src[0,Y_INDEX] = height*0.65
#top_right
warp_src[1,X_INDEX] = width*0.573
warp_src[1,Y_INDEX] = height*0.65
#bottom_left
warp_src[2,X_INDEX] = width*0.147
warp_src[2,Y_INDEX] = height*0.99
#bottom_right
warp_src[3,X_INDEX] = width*0.875
warp_src[3,Y_INDEX] = height*0.99
#top_left
warp_dst[0,X_INDEX] = width*0.35
warp_dst[0,Y_INDEX] = height*0.01
#top_right
warp_dst[1,X_INDEX] = width*0.65
warp_dst[1,Y_INDEX] = height*0.01
#bottom_left
warp_dst[2,X_INDEX] = width*0.35
warp_dst[2,Y_INDEX] = height*0.99
#bottom_right
warp_dst[3,X_INDEX] = width*0.65
warp_dst[3,Y_INDEX] = height*0.99
def perspective_warp(img_):
update_warp_points(img_)
img_size = (img_.shape[1], img_.shape[0])
M = cv2.getPerspectiveTransform(warp_src, warp_dst)
return cv2.warpPerspective(img_, M, img_size, flags=cv2.INTER_LINEAR)
def perspective_warp_inv(img_):
update_warp_points(img_)
img_size = (img_.shape[1], img_.shape[0])
Minv = cv2.getPerspectiveTransform(warp_dst, warp_src)
return cv2.warpPerspective(img_, Minv, img_size, flags=cv2.INTER_LINEAR)
'''
run it...
'''
# Example of Perspective Transform
pTrans_img = mpimg.imread('test_images/solidWhiteRight.jpg')
pTrans_img_undistort = cal_undistort(pTrans_img, mtx, dist)
pTrans_warped = perspective_warp(pTrans_img_undistort)
# Plotting transformed image
f, (ax1, ax2) = plt.subplots(2, 1, figsize=(20,10))
ax1.set_title('Original: Marked Src Points')
ax1.imshow(pTrans_img_undistort)
ax1.plot(warp_src[0,0],warp_src[0,1],'.') #top left
ax1.plot(warp_src[1,0],warp_src[1,1],'.') #top right
ax1.plot(warp_src[2,0],warp_src[2,1],'.') #bottom left
ax1.plot(warp_src[3,0],warp_src[3,1],'.') #bottom right
ax2.set_title('Perspective Transform: Marked Dst Points')
ax2.imshow(pTrans_warped)
ax2.plot(warp_dst[0,0],warp_dst[0,1],'.') #top left
ax2.plot(warp_dst[1,0],warp_dst[1,1],'.') #top right
ax2.plot(warp_dst[2,0],warp_dst[2,1],'.') #bottom left
ax2.plot(warp_dst[3,0],warp_dst[3,1],'.') #bottom right
"""
Region of Interest:
Goal: Only keep marker that are in the center
"""
def region_of_interest(img_, vertices_):
"""
Applies an image mask:
Only keeps the region of the image defined by the polygon
formed from `vertices`. The rest of the image is set to black.
"""
#defining a blank mask to start with
mask = np.zeros_like(img_)
#defining a 3 channel or 1 channel color to fill the mask with depending on the input image
if len(img_.shape) > 2:
channel_count = img_.shape[2] # i.e. 3 or 4 depending on your image
ignore_mask_color = (255,) * channel_count
else:
ignore_mask_color = 255
#filling pixels inside the polygon defined by "vertices" with the fill color
cv2.fillPoly(mask, vertices_, ignore_mask_color)
#returning the image only where mask pixels are nonzero
masked_image = cv2.bitwise_and(img_, mask)
return masked_image
def hk_region_ofInterest(img_):
height = img_.shape[0]
width = img_.shape[1]
height_mult = 0.60
width_mult_R = 0.54
width_mult_L = 0.44
# [horiz, vert]
top_left = [width*width_mult_L, height*height_mult]
top_right = [width*width_mult_R, height*height_mult]
bottom_right = [width, height]
bottom_left = [0, height]
poly = np.array([top_left, top_right, bottom_right, bottom_left], np.int32)
return region_of_interest(img_, [poly])
'''
run it...
'''
test_img_roi = hk_region_ofInterest(combined_binary)
# Display the image
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(20,10))
ax1.set_title('Original Image')
ax1.imshow(test_img)
ax2.set_title('Region of Interest')
ax2.imshow(test_img_roi, cmap='gray')
# http://stackoverflow.com/questions/14313510/how-to-calculate-moving-average-using-numpy
def moving_average(a, n=3):
ret = np.cumsum(a, dtype=float)
ret[n:] = ret[n:] - ret[:-n]
return ret[n - 1:] / n
PIXELS_TO_AVG = 10 #20
NUM_BANDS = 20 #10
TRACK_THRESHOLD = 10 #20
BOX_HALF_WIDTH = 50
def find_center(histo_, left_edge_, right_edge_):
histo_window = histo_[left_edge_:right_edge_]
hist_len = len(histo_window)
hist_max = np.argmax(histo_window)
# find left edge
index = hist_max
while (histo_window[index] > TRACK_THRESHOLD):
index -= 1
if index < 0:
break
hist_left = index+1
# find right edge
index = hist_max
while (histo_window[index] > TRACK_THRESHOLD):
index += 1
if index > (hist_len-1):
break
hist_right = index-1
# return center
return int((hist_left+hist_right)/2)
# Find point in histogram and add to list
def find_point(histogram_, avg_height_, is_initialized_, min_edge_, max_edge_, last_center_, list_):
# decide width to look in for max
if is_initialized_ == True:
left_edge = last_center_ - BOX_HALF_WIDTH
right_edge = last_center_ + BOX_HALF_WIDTH
else:
left_edge = min_edge_
right_edge = max_edge_
# find the max
center_guess = find_center(histogram_, left_edge, right_edge) + left_edge
center_value = histogram_[center_guess]
if center_value > TRACK_THRESHOLD:
list_.append((center_guess, avg_height_))
last_center_ = center_guess
if is_initialized_ == False:
is_initialized_ = True
return is_initialized_, last_center_, list_
# Find points on the lane lines
def get_lane_points(img_):
left = []
right = []
#band_start = img_.shape[0]
band_depth = int(img_.shape[0]/NUM_BANDS)
tops = np.arange(0, img_.shape[0]-1, band_depth)
bottoms = tops + (band_depth-1)
# reverse the arrays
tops = tops[::-1]
bottoms = bottoms[::-1]
is_left_initialized = False
is_right_initialized = False
last_left_center = 0
last_right_center = 0
for index in range(len(tops)):
histogram = np.sum(img_[tops[index]:bottoms[index],:], axis=0)
histogram = moving_average(histogram, PIXELS_TO_AVG)
hist_len = histogram.shape[0]
avg_height = (tops[index] + bottoms[index])/2
is_left_initialized, last_left_center, left = find_point(histogram,
avg_height,
is_left_initialized,
0,
int(hist_len/2),
last_left_center,
left)
is_right_initialized, last_right_center, right = find_point(histogram,
avg_height,
is_right_initialized,
int(hist_len/2),
hist_len-1,
last_right_center,
right)
return left, right
# Display points
def add_points(fig_, points_, marking_):
for pt_tuple in points_:
fig_.plot(pt_tuple[0],pt_tuple[1],marking_, markersize=30)
'''
run it...
'''
test_warped = perspective_warp(test_img_roi)
left_pts, right_pts = get_lane_points(test_warped)
# Display the image
f, (ax1) = plt.subplots(1, 1, figsize=(20,10))
ax1.set_title('Original: Marked Lane Points')
ax1.imshow(test_warped, cmap='gray')
add_points(ax1, left_pts, 'b.')
add_points(ax1, right_pts, 'r.')
# Define a class to receive the characteristics of each line detection
class Line:
def __init__(self, alpha_, num_output_pts_, img_height_):
# defined constants
## Conversions from pixels space to meters
self._y_mPerPix = 30/720 # meters/pixel in y dimension
self._x_mPerPix = 3.7/700 # meteres/pixel in x dimension
# undefined constants
self._alpha = alpha_
self._num_output_pts = num_output_pts_
self._img_height = img_height_
# variables
self._yMin = img_height_
self._tracked_pts = {} #dict of key=yVal, value=xVal
self._fit_coeff = [np.array([False])]
self._radius_of_curv = None
return
# points_ = list of (x,y) tuples
def _update_pts(self, points_):
for (xVal, yVal) in points_:
if yVal in self._tracked_pts:
# first-order low-pass filter
self._tracked_pts[yVal] = (1-self._alpha)*self._tracked_pts[yVal] \
+ self._alpha*xVal
else:
self._tracked_pts[yVal] = xVal
return
def _calc_curvature(self, y_vals_, x_vals_):
# Refit curve in meters
y_vals_m = y_vals_*self._y_mPerPix
x_vals_m = x_vals_*self._x_mPerPix
fit_coeff_m = np.polyfit(y_vals_m, x_vals_m, 2)
# f(y) = Ay^2 + By + C
A = fit_coeff_m[0]
B = fit_coeff_m[1]
y = self._img_height
# R = (1+(2Ay+B)^2)^1.5/abs(2A)
self._radius_of_curv = ((1 + (2*A*y + B)**2)**1.5) \
/np.absolute(2*A)
def _fit_curve(self):
# get x and y values
x_list = []
y_list = []
for key, value in self._tracked_pts.items():
x_list.append(value)
y_list.append(key)
x_vals = np.asarray(x_list)
y_vals = np.asarray(y_list)
self._yMin = min(y_vals)
# Fit a second order polynomial (fit_coeff[0]*y**2 + fit_coeff[1]*y + fit_coeff[2])
self._fit_coeff = np.polyfit(y_vals, x_vals, 2)
# calc curvature
self._calc_curvature(y_vals, x_vals)
'''
External API
'''
def update(self, points_):
self._update_pts(points_)
self._fit_curve()
@property
def yMin(self):
return self._yMin
def gen_curve_pts(self, y_min_):
self._yGen = np.linspace(y_min_, self._img_height-1, num=self._num_output_pts)
self._xGen = self._fit_coeff[0]*self._yGen**2 \
+ self._fit_coeff[1]*self._yGen \
+ self._fit_coeff[2]
@property
def xGen(self):
return self._xGen
@property
def yGen(self):
return self._yGen
@property
def radius_of_curv(self):
return self._radius_of_curv
def get_xPixelPos(self, yPixelPos_):
return self._fit_coeff[0]*yPixelPos_**2 \
+ self._fit_coeff[1]*yPixelPos_ \
+ self._fit_coeff[2]
@property
def x_mPerPixel(self):
return self._x_mPerPix
'''
test it...
'''
test_height = test_img.shape[0]
fake_line = Line(0.5, 10, test_height)
fake_line.update(right_pts)
print('y_min = {}'.format(fake_line.yMin))
fake_line.gen_curve_pts(fake_line.yMin)
print('xGen = {}'.format(fake_line.xGen))
print('')
fake_line.update(left_pts)
print('y_min = {}'.format(fake_line.yMin))
fake_line.gen_curve_pts(fake_line.yMin)
print('xGen = {}'.format(fake_line.xGen))
# give new points to the line classes
def update_lines(left_line_, right_line_, left_pts_, right_pts_):
left_line_.update(left_pts_)
right_line_.update(right_pts_)
test_yMin = min(left_line_.yMin, right_line_.yMin)
left_line_.gen_curve_pts(test_yMin)
right_line_.gen_curve_pts(test_yMin)
return left_line_, right_line_
# Display curve on figure
def add_curve(fig_, x_fit_, y_vals_):
fig_.plot(x_fit_, y_vals_, color='green', linewidth=3)
'''
run it...
'''
LINE_ALPHA = 0.5
LINE_NUM_PTS = 10
TEST_HEIGHT = test_img.shape[0]
test_left_line = Line(LINE_ALPHA, LINE_NUM_PTS, TEST_HEIGHT)
test_right_line = Line(LINE_ALPHA, LINE_NUM_PTS, TEST_HEIGHT)
test_left_line, test_right_line = update_lines(test_left_line,
test_right_line,
left_pts,
right_pts)
# Display the image
f, (ax1) = plt.subplots(1, 1, figsize=(20,10))
ax1.set_title('Original: Marked Lane Points')
ax1.imshow(test_warped, cmap='gray')
add_points(ax1, left_pts, 'b.')
add_points(ax1, right_pts, 'r.')
add_curve(ax1, test_left_line.xGen, test_left_line.yGen)
add_curve(ax1, test_right_line.xGen, test_right_line.yGen)
def path_overlay(rgb_img_, left_fitx_, left_yvals_, right_fitx_, right_yvals_):
# Create an image to draw the lines on
warp_zero = np.zeros_like(rgb_img_[:,:,0]).astype(np.uint8)
color_warp = np.dstack((warp_zero, warp_zero, warp_zero))
# Recast the x and y points into usable format for cv2.fillPoly()
pts_left = np.array([np.transpose(np.vstack([left_fitx_, left_yvals_]))])
pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx_, right_yvals_])))])
pts = np.hstack((pts_left, pts_right))
# Draw the lane onto the warped blank image
return cv2.fillPoly(color_warp, np.int_([pts]), (0,255, 0))
'''
run it...
'''
# generate overlays
overlay_warped = path_overlay(test_img_undistort, test_left_line.xGen, test_left_line.yGen,
test_right_line.xGen, test_right_line.yGen)
overlay_unwarped = perspective_warp_inv(overlay_warped)
# put overlays on images
test_warped_rgb = perspective_warp(test_img_undistort)
test_warped_marked = cv2.addWeighted(test_warped_rgb, 1, overlay_warped, 0.3, 0)
test_img_marked = cv2.addWeighted(test_img_undistort, 1, overlay_unwarped, 0.3, 0)
# Display the image
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(20,10))
ax1.set_title('Overlay in Warped Image')
ax1.imshow(test_warped_marked)
ax2.set_title('Overlay in Original Image')
ax2.imshow(test_img_marked)
TEXT_FONT = cv2.FONT_HERSHEY_SIMPLEX
TEXT_SIZE = 1.5
TEXT_COLOR = (255,255,255)
TEXT_THICKNESS = 5
def avg_curvature(left_curvature_, right_curvature_):
return (left_curvature_ + right_curvature_)/2
def display_curvature(img_, left_line_, right_line_):
# calculate curvature
avg_curv = avg_curvature(left_line_.radius_of_curv,
right_line_.radius_of_curv)
#string_curv = "radius of curvature = " + str(int(avg_curv)) + '[m]'
string_curv = "radius of curvature = {:.2E}[m]".format(avg_curv)
# put text on image
CURVATURE_TEXT_POSITION = (50,75)
text_curv = np.copy(img_)
return cv2.putText(text_curv, string_curv, CURVATURE_TEXT_POSITION,
TEXT_FONT, TEXT_SIZE, TEXT_COLOR, TEXT_THICKNESS)
'''
run it...
'''
test_text_curv = display_curvature(test_img_marked, test_left_line, test_right_line)
# Display the image
plt.imshow(test_text_curv)
def calc_position(img_height_, img_width_, left_line_, right_line_):
# get bottom left pixel position
left_pixelPos = left_line_.get_xPixelPos(img_height_)
# get bottom right pixel position
right_pixelPos = right_line_.get_xPixelPos(img_height_)
# average to find lane center
lane_center_pixel = (left_pixelPos+right_pixelPos)/2
return ((img_width_/2) - lane_center_pixel) * left_line_.x_mPerPixel
def display_position(img_, left_line_, right_line_):
vehicle_pos = calc_position(img_.shape[0], img_.shape[1], left_line_, right_line_)
string_pos = "vehicle is {:.2f}m wrt center".format(vehicle_pos)
# put text on image
POS_TEXT_POSITION = (50,150)
text_pos = np.copy(img_)
return cv2.putText(text_pos, string_pos, POS_TEXT_POSITION,
TEXT_FONT, TEXT_SIZE, TEXT_COLOR, TEXT_THICKNESS)
'''
run it...
'''
test_text_pos = display_position(test_text_curv, test_left_line, test_right_line)
# Display the image
plt.imshow(test_text_pos)
def process_image(image_, left_line_, right_line_, num_overlay_pts_=10):
# undistort images
img_undistort = cal_undistort(image_, mtx, dist)
# color selection and threshold
color_masks = combine_color_masks(img_undistort)
# sobel transforms and thresholds
l_channel = rgb_to_l(img_undistort)
combine_sobel_L = sobel_complex_combine(l_channel)
s_channel = rgb_to_s(img_undistort)
combine_sobel_S = sobel_complex_combine(s_channel)
# Combine color and sobels
_, combined_binary = combine_threshold(color_masks, combine_sobel_L, combine_sobel_S)
# Mask region of interest
img_roi = hk_region_ofInterest(combined_binary)
# Perspective Transform
img_warped = perspective_warp(img_roi)
# Grab points from image
left_points, right_points = get_lane_points(img_warped)
# Update line classes
left_line_, right_line_ = update_lines(left_line_, right_line_,
left_points, right_points)
# generate overlays
warped_overlay = path_overlay(img_undistort, left_line_.xGen, left_line_.yGen,
right_line_.xGen, right_line.yGen)
unwarped_overlay = perspective_warp_inv(warped_overlay)
# put overlays on images
img_weighted = cv2.addWeighted(img_undistort, 1, unwarped_overlay, 0.3, 0)
text_curv = display_curvature(img_weighted, left_line_, right_line_)
text_pos = display_position(text_curv, left_line_, right_line_)
return text_pos
print('Success: process_image() function defined.')
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import os
%matplotlib inline
is_left = True
img_names = os.listdir('test_images/')
for index, name in enumerate(img_names):
image = mpimg.imread('test_images/' + name)
SINGLE_FRAME_HEIGHT = image.shape[0]
left_line = Line(LINE_ALPHA, LINE_NUM_PTS, SINGLE_FRAME_HEIGHT)
right_line = Line(LINE_ALPHA, LINE_NUM_PTS, SINGLE_FRAME_HEIGHT)
image = process_image(image, left_line, right_line)
if is_left:
fig = plt.figure(figsize=(8, 6))
a=fig.add_subplot(1,2,1)
is_left = False
else:
a=fig.add_subplot(1,2,2)
is_left = True
a.set_title(name)
plt.imshow(image)
# VideoProcessor Class
class VideoProcessor:
def __init__(self, alpha_, num_output_pts_, img_height_):
self._left_line = Line(alpha_, num_output_pts_, img_height_)
self._right_line = Line(alpha_, num_output_pts_, img_height_)
'''
External API
'''
def process_images_multiple(self, image_):
return process_image(image_, self._left_line, self._right_line)
print('Success: VideoProcessor class defined.')
# Import everything needed to edit/save/watch video clips
from moviepy.editor import VideoFileClip
from IPython.display import HTML
def process_video(input_filename_, output_filename_):
# Grab the video
clip = VideoFileClip(input_filename_)
first_frame = clip.get_frame(0)
# Process the frames
VIDEO_HEIGHT = first_frame.shape[0]
video_processor = VideoProcessor(LINE_ALPHA, LINE_NUM_PTS, VIDEO_HEIGHT)
processed_clip = clip.fl_image(video_processor.process_images_multiple) #NOTE: this function expects color images!!
# Save the video
%time processed_clip.write_videofile(output_filename_, audio=False)
print('Success: process_video() function defined.')
process_video('project_video.mp4', 'project_soln.mp4')
HTML("""
<video width="960" height="540" controls>
<source src="{0}">
</video>
""".format('project_soln.mp4'))
process_video('challenge_video.mp4', 'challenge_soln.mp4')
HTML("""
<video width="960" height="540" controls>
<source src="{0}">
</video>
""".format('challenge_soln.mp4'))
process_video('harder_challenge_video.mp4', 'harder_challenge_soln.mp4')
HTML("""
<video width="960" height="540" controls>
<source src="{0}">
</video>
""".format('harder_challenge_soln.mp4'))